# IMPORT MODULES
# Import Import Numpy, TensorFlow, Scipy, Keras
import sys
import time
import os
import numpy as np
import pandas as pd
from glob import glob
import cv2
import matplotlib.pyplot as plt
from skimage.segmentation import mark_boundaries
import pickle
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy.signal import find_peaks_cwt
import random
from sklearn.utils import shuffle
import csv
import tensorflow as tf
from tensorflow.contrib.layers import flatten
from tensorflow.examples.tutorials.mnist import input_data
import os
import numpy as np
import tensorflow as tf
import random
import math
import warnings
import pandas as pd
import cv2
import matplotlib.pyplot as plt
from tqdm import tqdm
from itertools import chain
from skimage.io import imread, imshow, imread_collection, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from sklearn.utils import shuffle
import keras
from keras.datasets import cifar10
from keras.models import Sequential,model_from_json
from keras.layers import Dense,Dropout,Activation,Flatten
from keras.layers import Convolution2D,MaxPooling2D
from keras.layers import Flatten,Lambda,ELU
from keras.optimizers import SGD,Adam,RMSprop
from keras.layers.convolutional import Conv2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.initializers import glorot_uniform
from sklearn.cross_validation import train_test_split
import scipy.misc
from matplotlib.pyplot import imshow
%matplotlib inline
from IPython.display import SVG
import json
from keras.models import Sequential, model_from_json
print('Import Modules')
# LOAD DATA
# Change Directory
os.chdir('..')
# LOAD DATA
# Find Files
all_paths = pd.DataFrame(dict(Path = glob(os.path.join('Data','*', '*.*p*g'))))
all_paths['Split'] = all_paths['Path'].map(lambda x: x.split('/')[-2].split('-')[0])
all_paths['Group'] = all_paths['Path'].map(lambda x: x.split('/')[-2].split('-')[-1])
all_paths['ID'] = all_paths['Path'].map(lambda x: '_'.join(os.path.splitext(os.path.basename(x))[0].split('_')[:4]))
# LOAD DATA
# Display Samples
all_paths.sample(5)
# LOAD DATA
# Pivot Data
group_df = all_paths.pivot_table(values = 'Path', columns = 'Group', \
aggfunc = 'first', index = ['ID', 'Split']).reset_index()
# LOAD DATA
# Display Samples
group_df.sample(5)
# LOAD DATA
# Load Training Data Files
training_data_files = group_df.query('Split=="Train"')
data_size = training_data_files.shape[0]
print('Size of Training Data', data_size)
# EXPLORE DATA
# Load Images in Loop and Display
for i in range(10,30):
# Load Images
input_img = cv2.imread(training_data_files['Color'][i])
label_img = cv2.imread(training_data_files['Label'][i])
# Crop
input_img = input_img[1200:2400,:]
label_img = label_img[1200:2400,:]
# Convert to Grayscale
label_img = cv2.cvtColor(label_img, cv2.COLOR_RGB2GRAY)
# Resize Image to 100x100
input_img = cv2.resize(input_img, (560, 200))
label_img = cv2.resize(label_img, (560, 200))
# Plot
fig,(axis1,axis2)=plt.subplots(1,2,figsize=(15,15))
axis1.imshow(input_img)
axis1.set_title('Image',fontsize=7.5)
axis2.imshow(label_img)
axis2.set_title('Label',fontsize=7.5)
# PREPROCESS DATA
# Clear Memory
del all_paths
del group_df
# PREPROCESS DATA
# Load Input Images
x_train = []
for i in range(10000):
# Load Images
input_img = cv2.imread(training_data_files['Color'][i])
# Crop
input_img = input_img[1200:2400,:]
# Resize Image to 100x280
input_img = cv2.resize(input_img, (144, 144))
# Flatten Image and Append
image_flat = input_img.flatten().tolist()
x_train += image_flat
# Check Progress
if (i%100 == 0):
print('Images Loaded:', i)
# Check Size of Data
image_feature_size=int(len(x_train))
print("Feature Size:", image_feature_size)
# Revert to the Original Image Shapes
x_train = np.array(x_train).reshape(10000, 144, 144, 3)
print("Feature Size:", x_train.shape)
# PREPROCESS DATA
# Load Label Images
y_train = []
for i in range(10000):
# Load Images
label_img = cv2.imread(training_data_files['Label'][i])
# Crop
label_img = label_img[1200:2400,:]
# Convert to Grayscale
label_img = cv2.cvtColor(label_img, cv2.COLOR_RGB2GRAY)
# Resize Image to 100x280
label_img = cv2.resize(label_img, (144, 144))
# Flatten Image and Append
label_flat = label_img.flatten().tolist()
y_train += label_flat
# Check Progress
if (i%100 == 0):
print('Images Loaded:', i)
# Check Size of Data
label_feature_size=int(len(y_train))
print("Feature Size:", label_feature_size)
# Revert to the Original Image Shapes
y_train = np.array(y_train).reshape(10000, 144, 144, 1)
print("Feature Size:", y_train.shape)
# PREPROCESS DATA
# Load Input Images
x_valid = []
for i in range(2000):
# Load Images
input_img = cv2.imread(training_data_files['Color'][i])
# Crop
input_img = input_img[1200:2400,:]
# Resize Image to 100x280
input_img = cv2.resize(input_img, (144, 144))
# Flatten Image and Append
image_flat = input_img.flatten().tolist()
x_valid += image_flat
# Check Progress
if (i%100 == 0):
print('Images Loaded:', i)
# Check Size of Data
image_feature_size=int(len(x_valid))
print("Feature Size:", image_feature_size)
# Revert to the Original Image Shapes
x_valid = np.array(x_valid).reshape(2000, 144, 144, 3)
print("Feature Size:", x_valid.shape)
# PREPROCESS DATA
# Load Label Images
y_valid = []
for i in range(2000):
# Load Images
label_img = cv2.imread(training_data_files['Label'][i])
# Crop
label_img = label_img[1200:2400,:]
# Convert to Grayscale
label_img = cv2.cvtColor(label_img, cv2.COLOR_RGB2GRAY)
# Resize Image to 100x280
label_img = cv2.resize(label_img, (144, 144))
# Flatten Image and Append
label_flat = label_img.flatten().tolist()
y_valid += label_flat
# Check Progress
if (i%100 == 0):
print('Images Loaded:', i)
# Check Size of Data
label_feature_size=int(len(y_valid))
print("Feature Size:", label_feature_size)
# Revert to the Original Image Shapes
y_valid = np.array(y_valid).reshape(2000, 144, 144, 1)
print("Feature Size:", y_valid.shape)
# PREPROCESS DATA
# Clear Memory
del training_data_files
# CREATE MODEL
# Define a Function for Convolutional Block
def convolutional_block(input_tensor, depth, kernel, strides=(1, 1), padding="SAME", batchnorm = False):
layer = tf.layers.conv2d(input_tensor, \
filters = depth, \
kernel_size = kernel, \
strides = strides, \
padding = padding, \
activation = tf.nn.relu)
if batchnorm:
layer = tf.layers.batch_normalization(layer, training = False)
return layer
# CREATE MODEL
# Define a Function for Deconvolutional Block
def deconvolutional_block(input_tensor, filter_size, output_size, \
out_channels, in_channels, \
strides = [1, 1, 1, 1], name = False):
input_shape = tf.shape(input_tensor)
batch_size = input_shape[0]
out_shape = tf.stack([batch_size, output_size, output_size, out_channels])
filter_shape = [filter_size, filter_size, out_channels, in_channels]
weights = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.05))
if name:
layer = tf.nn.conv2d_transpose(input_tensor, weights, out_shape, strides, padding = 'SAME', name = 'y_pred')
else:
layer = tf.nn.conv2d_transpose(input_tensor, weights, out_shape, strides, padding = 'SAME')
return layer
# CREATE MODEL
# Define a Function for Architecture
def UNet(x, n_filters):
c1_a = convolutional_block(x, 16 * n_filters, 3)
c1 = convolutional_block(c1_a, 16 * n_filters, 3)
c1 = tf.nn.max_pool(value=c1,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
c2_a = convolutional_block(c1, 32 * n_filters, 3)
c2 = convolutional_block(c2_a, 32 * n_filters, 3)
c2 = tf.nn.max_pool(value=c2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
c3_a = convolutional_block(c2, 64 * n_filters, 3)
c3 = convolutional_block(c3_a, 64 * n_filters, 3)
c3 = tf.nn.max_pool(value=c3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
c4_a = convolutional_block(c3, 128 * n_filters, 3)
c4 = convolutional_block(c4_a, 128 * n_filters, 3)
c4 = tf.nn.max_pool(value=c4,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
c5_a = convolutional_block(c4, 256 * n_filters, 3)
c5 = convolutional_block(c5_a, 256 * n_filters, 3)
d1 = deconvolutional_block(c5, 2, 18, 128 * n_filters, 256 * n_filters, strides=[1, 2, 2, 1])
cat1 = tf.concat([d1, c4_a], 3)
cat1 = convolutional_block(cat1, 128 * n_filters, 3)
cat1 = convolutional_block(cat1, 128 * n_filters, 3)
d2 = deconvolutional_block(cat1, 2, 36, 64 * n_filters, 128 * n_filters, strides=[1, 2, 2, 1])
cat2 = tf.concat([d2, c3_a], 3)
cat2 = convolutional_block(cat2, 64 * n_filters, 3)
cat2 = convolutional_block(cat2, 64 * n_filters, 3)
d3 = deconvolutional_block(cat2, 2, 72, 32 * n_filters, 64 * n_filters, strides=[1, 2, 2, 1])
cat3 = tf.concat([d3, c2_a], 3)
cat3 = convolutional_block(cat3, 32 * n_filters, 3)
cat3 = convolutional_block(cat3, 32 * n_filters, 3)
d4 = deconvolutional_block(cat3, 2, 144, 16 * n_filters, 32 * n_filters, strides=[1, 2, 2, 1])
cat4 = tf.concat([d4, c1_a], 3)
cat4 = convolutional_block(cat4, 16 * n_filters, 3)
cat4 = convolutional_block(cat4, 16 * n_filters, 3)
y_pred = deconvolutional_block(d4, 1, 144, 1, 16 * n_filters, name = True)
return y_pred
# CONFIGURE GPU
# Configure
config=tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allocator_type='BFC'
config.gpu_options.per_process_gpu_memory_fraction=0.80
# TRAIN
# Set Placeholder
x = tf.placeholder(tf.float32, shape=[None, 144, 144, 3], name = 'x')
y = tf.placeholder(tf.float32, [None, 144, 144, 1], name = 'y')
print("Setting Placeholder")
# TRAIN
# Set Training Pipeline
rate = 0.00001
batch_size = 8
epochs = 5000
y_pred = UNet(x,2)
loss_operation = tf.losses.mean_squared_error(y, y_pred)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
print("Setting Training Pipeline")
# TRAIN
# Initialize Evaluation
correct_prediction = tf.equal(tf.argmax(y_pred,1),tf.argmax(y,1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Define Evaluation
def evaluate(x_data, y_data):
num_examples = len(x_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, batch_size):
batch_x, batch_y = x_data[offset:offset + batch_size], y_data[offset:offset + batch_size]
accuracy = sess.run([loss_operation, training_operation], feed_dict={x:batch_x, y:batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy/num_examples
# TRAIN
# Initialize Saver
saver = tf.train.Saver()
# Use Session to Test Model
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(x_train)
print("Training...")
for i in range(epochs):
x_train, y_train = shuffle(x_train, y_train)
for offset in range(0, num_examples, batch_size):
end = offset + batch_size
batch_x, batch_y = x_train[offset:end], y_train[offset:end]
loss_value, _ = sess.run([loss_operation, training_operation], feed_dict={x:batch_x, y:batch_y})
print("Epoch {} ...".format(i+1))
print("Training Loss = {:.3f}".format(loss_value))
saver.save(sess,'./Model-Tensorflow/unet')
print("Model Saved")
# TEST
# Define a Function to Convert Label Image to Binary Image
def threshold_labels(img, thresh):
# Create Binary Image
binary_img = np.zeros((img.shape[0], img.shape[1]))
# Create For Loops for Check ground Pixels
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if img[i,j] > thresh:
binary_img[i,j] = 255
else:
binary_img[i,j] = 0
return binary_img
# Evaluate the Model on Random Images
import time
# Use Session to Infer
with tf.Session() as sess:
saver.restore(sess,'Model-Tensorflow/unet')
for i in range(100,200):
# Predict
original_image = x_valid[i]
img = x_valid[i]
label_image = y_valid[i]
start_time = time.time()
original_image = np.reshape(original_image, [-1, 144, 144, 3])
test_data = {x:original_image}
test_mask = sess.run([y_pred],feed_dict=test_data)
test_mask = np.reshape(test_mask, (144,144))
end_time = time.time()
print('Computation Time:', end_time - start_time)
binary_image = threshold_labels(test_mask, 100)
# Resize Image
img = resize(img, (200, 560), anti_aliasing=True)
label_image = resize(label_image, (200, 560), anti_aliasing=True)
binary_image = resize(binary_image, (200, 560), anti_aliasing=True)
# Plot
fig, (axis1, axis2, axis3) = plt.subplots(1, 3, figsize = (15,15))
axis1.imshow(img[:,:,0], cmap = 'jet')
axis1.set_title('Input Image')
axis2.imshow(label_image[:,:,0])
axis2.set_title('Label')
axis3.imshow(binary_image)
axis3.set_title('Binary Prediction')